This code is for the results to the question, “Is there stability across caregivers, regardless of activity?”, but conducts analyses separately per language and for separate levels for all other-child-centered activities (see pre-registration here: https://osf.io/byjfg/).

Load libraries

library(tidyverse)
library(GGally)
library(ppcor)
library(psych)
library(Hmisc)
library(sjPlot)

# https://github.com/ggobi/ggally/issues/139
my_custom_smooth <- function(data, mapping, ...) {
  ggplot(data = data, mapping = mapping) +
    geom_point(alpha = .4, color = I("black")) + 
    geom_smooth(method = "lm", color = I("blue"), ...)
}

Read in data and demographic information

# NOTE about periods of non-tCDCS
# gemods refers to when there are designated start/end periods of other-directed speech (ODS); this was captured using gems (@G) using CHAT conventions
# kwalods refers to when ODS was transcribed at an utterance-level within a tCDS activity period between caregiver and child (e.g., other-directed speech in the background); this was captured per utterances using CHAT postcodes
## for tokens/min and types/min, we do not include ODS that occurred within a period of tCDS, because durations were captured by activity and not by utterance
## for mlu, we include all ODS across gemods and kwalods


# NOTE about speech == "all"
# "speech" includes two levels: all, spont
# all = refers to all speech by caregivers
# spont = refers to only speech by caregivers that was considered spontaneous rather than recited (e.g., reading book text, singing memorized common songs like itsy bitsy spider); therefore, 'spont' is a subset of 'all'


# freq
freq <- read_csv("./data_demo_lena_transcripts/freq.csv") %>% 
  filter(activity != "kwalods", 
         speech == "all") %>% 
  mutate(activity = recode(activity, "gemods" = "non_tcds")) %>% 
  mutate(id = factor(id), 
         language = factor(language),
         activity = factor(activity, levels = c("books", "play", "food", 
                                                "routines", "conv", "ac", "non_tcds")))



# mlu
mlu <- read_csv("./data_demo_lena_transcripts/mlu.csv") %>% 
  filter(speech == "all") %>% 
  mutate(activity = recode(activity, "ods" = "non_tcds")) %>% 
  mutate(id = factor(id), 
         language = factor(language),
         activity = factor(activity, levels = c("books", "play", "food", 
                                                "routines", "conv", "ac", "non_tcds")))


# chip
# this includes only caregivers, therefore there is no speaker column
# we exclude periods of ODS because this is about responsiveness to the child during periods of tCDS
chip <- read_csv("./data_demo_lena_transcripts/chip.csv") %>% 
  filter(activity != "ods") %>% 
  mutate(id = factor(id), 
         language = factor(language),
         activity = factor(activity, levels = c("books", "play", "food", 
                                                "routines", "conv", "ac", "non_tcds")))
  
  

str(freq)
## spec_tbl_df[,13] [3,308 × 13] (S3: spec_tbl_df/tbl_df/tbl/data.frame)
##  $ X1           : num [1:3308] 1 2 3 4 5 6 7 8 9 10 ...
##  $ id           : Factor w/ 90 levels "7292","7352",..: 47 47 47 47 50 50 52 52 52 52 ...
##  $ rectime      : num [1:3308] 11923 11923 31360 31360 21499 ...
##  $ activity     : Factor w/ 7 levels "books","play",..: 1 1 1 1 1 1 1 1 1 1 ...
##  $ speaker      : chr [1:3308] "CHI" "ADULTS" "CHI" "ADULTS" ...
##  $ tokens       : num [1:3308] 30 151 35 143 58 588 42 286 33 152 ...
##  $ types        : num [1:3308] 17 70 17 65 17 199 19 53 17 59 ...
##  $ segment_num  : num [1:3308] 12 12 15 15 2 2 11 11 5 5 ...
##  $ language     : Factor w/ 2 levels "english","spanish": 1 1 1 1 1 1 1 1 1 1 ...
##  $ speech       : chr [1:3308] "all" "all" "all" "all" ...
##  $ dur_min      : num [1:3308] 3.55 3.55 6.57 6.57 4.71 ...
##  $ tokens_permin: num [1:3308] 8.46 42.57 5.32 21.75 12.31 ...
##  $ types_permin : num [1:3308] 4.79 19.73 2.59 9.89 3.61 ...
##  - attr(*, "spec")=
##   .. cols(
##   ..   X1 = col_double(),
##   ..   id = col_double(),
##   ..   rectime = col_double(),
##   ..   activity = col_character(),
##   ..   speaker = col_character(),
##   ..   tokens = col_double(),
##   ..   types = col_double(),
##   ..   segment_num = col_double(),
##   ..   language = col_character(),
##   ..   speech = col_character(),
##   ..   dur_min = col_double(),
##   ..   tokens_permin = col_double(),
##   ..   types_permin = col_double()
##   .. )
str(mlu)
## spec_tbl_df[,9] [3,002 × 9] (S3: spec_tbl_df/tbl_df/tbl/data.frame)
##  $ id         : Factor w/ 90 levels "7292","7352",..: 46 46 46 46 46 46 46 46 46 46 ...
##  $ activity   : Factor w/ 7 levels "books","play",..: 6 6 5 5 7 7 2 2 6 6 ...
##  $ speaker    : chr [1:3002] "ADULTS" "CHI" "ADULTS" "CHI" ...
##  $ segment_num: num [1:3002] 2 2 2 2 2 2 2 2 3 3 ...
##  $ words_sum  : num [1:3002] 210 66 175 43 11 16 189 47 261 78 ...
##  $ num_utt_sum: num [1:3002] 66 35 64 24 2 12 64 28 87 43 ...
##  $ mlu_w      : num [1:3002] 3.18 1.89 2.73 1.79 5.5 ...
##  $ language   : Factor w/ 2 levels "english","spanish": 1 1 1 1 1 1 1 1 1 1 ...
##  $ speech     : chr [1:3002] "all" "all" "all" "all" ...
##  - attr(*, "spec")=
##   .. cols(
##   ..   id = col_double(),
##   ..   activity = col_character(),
##   ..   speaker = col_character(),
##   ..   segment_num = col_double(),
##   ..   words_sum = col_double(),
##   ..   num_utt_sum = col_double(),
##   ..   mlu_w = col_double(),
##   ..   language = col_character(),
##   ..   speech = col_character()
##   .. )
str(chip)
## spec_tbl_df[,11] [1,118 × 11] (S3: spec_tbl_df/tbl_df/tbl/data.frame)
##  $ activity                         : Factor w/ 7 levels "books","play",..: 6 5 2 6 5 4 6 5 4 2 ...
##  $ id                               : Factor w/ 90 levels "7292","7352",..: 46 46 46 46 46 46 46 46 46 46 ...
##  $ rectime                          : num [1:1118] 15242 15242 15242 14342 14342 ...
##  $ total_adult_utt                  : num [1:1118] 68 64 65 91 43 13 50 8 65 127 ...
##  $ total_child_utt                  : num [1:1118] 46 34 33 54 17 3 14 1 29 49 ...
##  $ total_adult_resp                 : num [1:1118] 62 51 54 77 24 9 30 4 56 106 ...
##  $ total_adult_imitexp              : num [1:1118] 18 13 15 25 5 2 9 0 16 21 ...
##  $ prop_adultresp_outof_childutt    : num [1:1118] 1.35 1.5 1.64 1.43 1.41 ...
##  $ prop_adult_imitexp_outof_childutt: num [1:1118] 0.391 0.382 0.455 0.463 0.294 ...
##  $ language                         : Factor w/ 2 levels "english","spanish": 1 1 1 1 1 1 1 1 1 1 ...
##  $ segment_num                      : num [1:1118] 2 2 2 3 3 3 4 4 4 5 ...
##  - attr(*, "spec")=
##   .. cols(
##   ..   activity = col_character(),
##   ..   id = col_double(),
##   ..   rectime = col_double(),
##   ..   total_adult_utt = col_double(),
##   ..   total_child_utt = col_double(),
##   ..   total_adult_resp = col_double(),
##   ..   total_adult_imitexp = col_double(),
##   ..   prop_adultresp_outof_childutt = col_double(),
##   ..   prop_adult_imitexp_outof_childutt = col_double(),
##   ..   language = col_character(),
##   ..   segment_num = col_double()
##   .. )

Create dfs for ADULTS

# FREQ
freq_adult_en <- freq %>% 
  filter(speaker == "ADULTS") %>% 
  filter(language == "english")

freq_adult_sp <- freq %>% 
  filter(speaker == "ADULTS") %>% 
  filter(language == "spanish")


# MLU
mlu_adult_en <- mlu %>% 
  filter(speaker == "ADULTS") %>% 
  filter(language == "english")

mlu_adult_sp <- mlu %>% 
  filter(speaker == "ADULTS") %>% 
  filter(language == "spanish")

Create dfs averaging across segments for one obs per activity

Tokens and Types (rate per min)

# tokens and types: average per activity
freq_adult_per_activity_id_en <- freq_adult_en %>% 
  group_by(id, activity) %>% 
  mutate(tokens_permin_avg_act = mean(tokens_permin), 
         types_permin_avg_act = mean(types_permin)) %>% 
  distinct(id, activity, language, tokens_permin_avg_act, types_permin_avg_act) %>% 
  ungroup() %>% 
  mutate(activity = factor(activity, levels = c("books", "play", "food", 
                                                "routines", "conv", "ac", "non_tcds")))

freq_adult_per_activity_id_sp <- freq_adult_sp %>% 
  group_by(id, activity) %>% 
  mutate(tokens_permin_avg_act = mean(tokens_permin), 
         types_permin_avg_act = mean(types_permin)) %>% 
  distinct(id, activity, language, tokens_permin_avg_act, types_permin_avg_act) %>% 
  ungroup() %>% 
  mutate(activity = factor(activity, levels = c("books", "play", "food", 
                                                "routines", "conv", "ac", "non_tcds")))

MLUw

# mlu: average per activity
mlu_adult_per_activity_id_en <- mlu_adult_en %>% 
  group_by(id, activity) %>% 
  mutate(mluw_avg_act = mean(mlu_w)) %>% 
  distinct(id, activity, language, mluw_avg_act) %>% 
  ungroup() %>% 
  mutate(activity = factor(activity, levels = c("books", "play", "food", 
                                                "routines", "conv", "ac", "non_tcds")))

mlu_adult_per_activity_id_sp <- mlu_adult_sp %>% 
  group_by(id, activity) %>% 
  mutate(mluw_avg_act = mean(mlu_w)) %>% 
  distinct(id, activity, language, mluw_avg_act) %>% 
  ungroup() %>% 
  mutate(activity = factor(activity, levels = c("books", "play", "food", 
                                                "routines", "conv", "ac", "non_tcds")))

Responses and Imitations/Expansions

# chip: average per activity
chip_per_activity_id_en <- chip %>% 
  filter(language == "english") %>% 
  group_by(id, activity) %>% 
  mutate(prop_adultresp_avg_act = mean(prop_adultresp_outof_childutt, na.rm = T), 
         prop_adult_imitexp_avg_act = mean(prop_adult_imitexp_outof_childutt, na.rm = T)) %>% 
  distinct(id, activity, language, prop_adultresp_avg_act, prop_adult_imitexp_avg_act)


chip_per_activity_id_sp <- chip %>% 
  filter(language == "spanish") %>% 
  group_by(id, activity) %>% 
  mutate(prop_adultresp_avg_act = mean(prop_adultresp_outof_childutt, na.rm = T), 
         prop_adult_imitexp_avg_act = mean(prop_adult_imitexp_outof_childutt, na.rm = T)) %>% 
  distinct(id, activity, language, prop_adultresp_avg_act, prop_adult_imitexp_avg_act)

Create wide dfs for matrices - tokens and types (rate)

# tokens
# all
tokens_mtx_rate_en <- freq_adult_per_activity_id_en %>% 
  dplyr::select(id, language, activity, tokens_permin_avg_act) %>% 
  pivot_wider(names_from = activity, values_from = tokens_permin_avg_act) %>% 
  ungroup() %>% 
  dplyr::select(c("books", "play", "food", "routines", "conv", "ac", "non_tcds"))

tokens_mtx_rate_sp <- freq_adult_per_activity_id_sp %>% 
  dplyr::select(id, language, activity, tokens_permin_avg_act) %>% 
  pivot_wider(names_from = activity, values_from = tokens_permin_avg_act) %>% 
  ungroup() %>% 
  dplyr::select(c("books", "play", "food", "routines", "conv", "ac", "non_tcds"))


# types
# all
types_mtx_rate_en <- freq_adult_per_activity_id_en %>% 
  dplyr::select(id, language, activity, types_permin_avg_act) %>% 
  pivot_wider(names_from = activity, values_from = types_permin_avg_act) %>% 
  ungroup() %>% 
  dplyr::select(c("books", "play", "food", "routines", "conv", "ac", "non_tcds"))

types_mtx_rate_sp <- freq_adult_per_activity_id_sp %>% 
  dplyr::select(id, language, activity, types_permin_avg_act) %>% 
  pivot_wider(names_from = activity, values_from = types_permin_avg_act) %>% 
  ungroup() %>% 
  dplyr::select(c("books", "play", "food", "routines", "conv", "ac", "non_tcds"))

Create wide dfs for matrices - mluw

# all
mlu_mtx_en <- mlu_adult_per_activity_id_en %>% 
  dplyr::select(id, language, activity, mluw_avg_act) %>% 
  pivot_wider(names_from = activity, values_from = mluw_avg_act) %>% 
  ungroup() %>% 
  dplyr::select(c("books", "play", "food", "routines", "conv", "ac", "non_tcds"))


mlu_mtx_sp <- mlu_adult_per_activity_id_sp %>% 
  dplyr::select(id, language, activity, mluw_avg_act) %>% 
  pivot_wider(names_from = activity, values_from = mluw_avg_act) %>% 
  ungroup() %>% 
  dplyr::select(c("books", "play", "food", "routines", "conv", "ac", "non_tcds"))

Create wide dfs for matrix - responses and imit/exp

# prop responses
propresp_mtx_en <- chip_per_activity_id_en %>% 
  dplyr::select(id, language, activity, prop_adultresp_avg_act) %>% 
  pivot_wider(names_from = activity, values_from = prop_adultresp_avg_act) %>% 
  ungroup() %>% 
  dplyr::select(c("books", "play", "food", "routines", "conv", "ac"))

propresp_mtx_sp <- chip_per_activity_id_sp %>% 
  dplyr::select(id, language, activity, prop_adultresp_avg_act) %>% 
  pivot_wider(names_from = activity, values_from = prop_adultresp_avg_act) %>% 
  ungroup() %>% 
  dplyr::select(c("books", "play", "food", "routines", "conv", "ac"))



# prop imitations and expansions
propimitexp_mtx_en <- chip_per_activity_id_en %>% 
  dplyr::select(id, language, activity, prop_adult_imitexp_avg_act) %>% 
  pivot_wider(names_from = activity, values_from = prop_adult_imitexp_avg_act) %>% 
  ungroup() %>% 
  dplyr::select(c("books", "play", "food", "routines", "conv", "ac"))

propimitexp_mtx_sp <- chip_per_activity_id_sp %>% 
  dplyr::select(id, language, activity, prop_adult_imitexp_avg_act) %>% 
  pivot_wider(names_from = activity, values_from = prop_adult_imitexp_avg_act) %>% 
  ungroup() %>% 
  dplyr::select(c("books", "play", "food", "routines", "conv", "ac"))

Correlation Matrices - PER language

Tokens - Rate

# english
ggpairs(data = tokens_mtx_rate_en, 
        columns = 1:7, 
        switch = 'y', 
        lower = list(continuous = my_custom_smooth),
        upper = list(continuous = wrap("cor", size = 7)),
        title = "English - Tokens rate") + 
  theme_classic() + 
  theme(text= element_text(size = 18),
        strip.placement = "outside",
        strip.text.y = element_text(face = "bold", size = 15), 
        strip.text.x = element_text(face = "bold", size = 15)) 

# correlation matrices
rcorr(as.matrix(tokens_mtx_rate_en), type = c("pearson"))
##          books play  food routines conv   ac non_tcds
## books     1.00 0.50  0.28     0.29 0.47 0.25     0.38
## play      0.50 1.00  0.13     0.38 0.45 0.32     0.50
## food      0.28 0.13  1.00    -0.19 0.12 0.05     0.02
## routines  0.29 0.38 -0.19     1.00 0.29 0.05     0.40
## conv      0.47 0.45  0.12     0.29 1.00 0.16     0.46
## ac        0.25 0.32  0.05     0.05 0.16 1.00     0.14
## non_tcds  0.38 0.50  0.02     0.40 0.46 0.14     1.00
## 
## n
##          books play food routines conv ac non_tcds
## books       22   19   15       14   21 22       22
## play        19   39   27       27   37 39       39
## food        15   27   31       22   30 31       31
## routines    14   27   22       32   30 32       32
## conv        21   37   30       30   43 43       43
## ac          22   39   31       32   43 45       45
## non_tcds    22   39   31       32   43 45       45
## 
## P
##          books  play   food   routines conv   ac     non_tcds
## books           0.0299 0.3132 0.3169   0.0323 0.2553 0.0838  
## play     0.0299        0.5110 0.0524   0.0054 0.0499 0.0012  
## food     0.3132 0.5110        0.3871   0.5188 0.7813 0.9357  
## routines 0.3169 0.0524 0.3871          0.1261 0.7711 0.0244  
## conv     0.0323 0.0054 0.5188 0.1261          0.3047 0.0018  
## ac       0.2553 0.0499 0.7813 0.7711   0.3047        0.3659  
## non_tcds 0.0838 0.0012 0.9357 0.0244   0.0018 0.3659
# spanish
ggpairs(data = tokens_mtx_rate_sp, 
        columns = 1:7, 
        switch = 'y', 
        lower = list(continuous = my_custom_smooth),
        upper = list(continuous = wrap("cor", size = 7)),
        title = "Spanish - Tokens rate") + 
  theme_classic() + 
  theme(text= element_text(size = 18),
        strip.placement = "outside",
        strip.text.y = element_text(face = "bold", size = 15), 
        strip.text.x = element_text(face = "bold", size = 15)) 

# correlation matrices
rcorr(as.matrix(tokens_mtx_rate_sp), type = c("pearson"))
##          books play  food routines  conv    ac non_tcds
## books     1.00 0.58 -0.21     0.72  0.61 -0.05     0.35
## play      0.58 1.00  0.14     0.18  0.53  0.23     0.25
## food     -0.21 0.14  1.00     0.12 -0.01  0.16     0.19
## routines  0.72 0.18  0.12     1.00  0.52  0.43     0.51
## conv      0.61 0.53 -0.01     0.52  1.00  0.29     0.47
## ac       -0.05 0.23  0.16     0.43  0.29  1.00     0.43
## non_tcds  0.35 0.25  0.19     0.51  0.47  0.43     1.00
## 
## n
##          books play food routines conv ac non_tcds
## books       20   15   11       15   20 20       20
## play        15   37   26       29   36 37       37
## food        11   26   31       24   29 31       31
## routines    15   29   24       35   35 35       35
## conv        20   36   29       35   43 43       43
## ac          20   37   31       35   43 45       45
## non_tcds    20   37   31       35   43 45       45
## 
## P
##          books  play   food   routines conv   ac     non_tcds
## books           0.0236 0.5356 0.0025   0.0040 0.8235 0.1327  
## play     0.0236        0.5001 0.3461   0.0009 0.1663 0.1315  
## food     0.5356 0.5001        0.5896   0.9702 0.3956 0.2975  
## routines 0.0025 0.3461 0.5896          0.0013 0.0104 0.0016  
## conv     0.0040 0.0009 0.9702 0.0013          0.0632 0.0017  
## ac       0.8235 0.1663 0.3956 0.0104   0.0632        0.0034  
## non_tcds 0.1327 0.1315 0.2975 0.0016   0.0017 0.0034

Types - Rate

# english
ggpairs(data = types_mtx_rate_en, 
        columns = 1:7, 
        switch = 'y', 
        lower = list(continuous = my_custom_smooth),
        upper = list(continuous = wrap("cor", size = 7)),
        title = "English - Types rate") + 
  theme_classic() + 
  theme(text= element_text(size = 18),
        strip.placement = "outside",
        strip.text.y = element_text(face = "bold", size = 15), 
        strip.text.x = element_text(face = "bold", size = 15)) 

# correlation matrices
rcorr(as.matrix(types_mtx_rate_en), type = c("pearson"))
##          books  play  food routines  conv   ac non_tcds
## books     1.00  0.35 -0.22     0.40  0.25 0.14     0.02
## play      0.35  1.00 -0.07    -0.02  0.19 0.22     0.34
## food     -0.22 -0.07  1.00    -0.12  0.13 0.11    -0.03
## routines  0.40 -0.02 -0.12     1.00 -0.02 0.00     0.10
## conv      0.25  0.19  0.13    -0.02  1.00 0.01     0.37
## ac        0.14  0.22  0.11     0.00  0.01 1.00     0.17
## non_tcds  0.02  0.34 -0.03     0.10  0.37 0.17     1.00
## 
## n
##          books play food routines conv ac non_tcds
## books       22   19   15       14   21 22       22
## play        19   39   27       27   37 39       39
## food        15   27   31       22   30 31       31
## routines    14   27   22       32   30 32       32
## conv        21   37   30       30   43 43       43
## ac          22   39   31       32   43 45       45
## non_tcds    22   39   31       32   43 45       45
## 
## P
##          books  play   food   routines conv   ac     non_tcds
## books           0.1473 0.4409 0.1537   0.2669 0.5270 0.9463  
## play     0.1473        0.7287 0.9017   0.2520 0.1885 0.0315  
## food     0.4409 0.7287        0.5926   0.4843 0.5418 0.8581  
## routines 0.1537 0.9017 0.5926          0.9035 0.9976 0.5857  
## conv     0.2669 0.2520 0.4843 0.9035          0.9523 0.0136  
## ac       0.5270 0.1885 0.5418 0.9976   0.9523        0.2589  
## non_tcds 0.9463 0.0315 0.8581 0.5857   0.0136 0.2589
# spanish
ggpairs(data = types_mtx_rate_sp, 
        columns = 1:7, 
        switch = 'y', 
        lower = list(continuous = my_custom_smooth),
        upper = list(continuous = wrap("cor", size = 7)),
        title = "Spanish - Types rate") + 
  theme_classic() + 
  theme(text= element_text(size = 18),
        strip.placement = "outside",
        strip.text.y = element_text(face = "bold", size = 15), 
        strip.text.x = element_text(face = "bold", size = 15)) 

# correlation matrices
rcorr(as.matrix(types_mtx_rate_sp), type = c("pearson"))
##          books play  food routines conv    ac non_tcds
## books     1.00 0.37 -0.27     0.34 0.34 -0.22     0.01
## play      0.37 1.00  0.47     0.04 0.41  0.10     0.18
## food     -0.27 0.47  1.00     0.14 0.10  0.05     0.29
## routines  0.34 0.04  0.14     1.00 0.11  0.00     0.12
## conv      0.34 0.41  0.10     0.11 1.00  0.05     0.47
## ac       -0.22 0.10  0.05     0.00 0.05  1.00     0.22
## non_tcds  0.01 0.18  0.29     0.12 0.47  0.22     1.00
## 
## n
##          books play food routines conv ac non_tcds
## books       20   15   11       15   20 20       20
## play        15   37   26       29   36 37       37
## food        11   26   31       24   29 31       31
## routines    15   29   24       35   35 35       35
## conv        20   36   29       35   43 43       43
## ac          20   37   31       35   43 45       45
## non_tcds    20   37   31       35   43 45       45
## 
## P
##          books  play   food   routines conv   ac     non_tcds
## books           0.1787 0.4281 0.2174   0.1426 0.3414 0.9587  
## play     0.1787        0.0165 0.8523   0.0127 0.5708 0.2926  
## food     0.4281 0.0165        0.5153   0.6012 0.7788 0.1105  
## routines 0.2174 0.8523 0.5153          0.5313 0.9922 0.4758  
## conv     0.1426 0.0127 0.6012 0.5313          0.7391 0.0014  
## ac       0.3414 0.5708 0.7788 0.9922   0.7391        0.1528  
## non_tcds 0.9587 0.2926 0.1105 0.4758   0.0014 0.1528

MLUw

# english
ggpairs(data = mlu_mtx_en, 
        columns = 1:7, 
        switch = 'y', 
        lower = list(continuous = my_custom_smooth),
        upper = list(continuous = wrap("cor", size = 11)),
        title = "English - MLUw") + 
  theme_classic() + 
  theme(text= element_text(size = 26),
        strip.placement = "outside",
        strip.text.y = element_text(face = "bold", size = 20), 
        strip.text.x = element_text(face = "bold", size = 20)) 

# correlation matrices
rcorr(as.matrix(mlu_mtx_en), type = c("pearson"))
##          books play food routines conv   ac non_tcds
## books     1.00 0.35 0.55    -0.02 0.50 0.21     0.00
## play      0.35 1.00 0.54     0.32 0.42 0.32     0.33
## food      0.55 0.54 1.00     0.56 0.68 0.53     0.56
## routines -0.02 0.32 0.56     1.00 0.28 0.20     0.40
## conv      0.50 0.42 0.68     0.28 1.00 0.50     0.34
## ac        0.21 0.32 0.53     0.20 0.50 1.00     0.18
## non_tcds  0.00 0.33 0.56     0.40 0.34 0.18     1.00
## 
## n
##          books play food routines conv ac non_tcds
## books       22   19   15       14   21 22       22
## play        19   39   27       27   37 39       39
## food        15   27   31       22   30 31       31
## routines    14   27   22       32   30 32       32
## conv        21   37   30       30   43 43       43
## ac          22   39   31       32   43 45       45
## non_tcds    22   39   31       32   43 45       45
## 
## P
##          books  play   food   routines conv   ac     non_tcds
## books           0.1364 0.0338 0.9410   0.0200 0.3399 0.9945  
## play     0.1364        0.0036 0.1046   0.0102 0.0473 0.0431  
## food     0.0338 0.0036        0.0069   0.0000 0.0020 0.0011  
## routines 0.9410 0.1046 0.0069          0.1347 0.2690 0.0221  
## conv     0.0200 0.0102 0.0000 0.1347          0.0006 0.0254  
## ac       0.3399 0.0473 0.0020 0.2690   0.0006        0.2386  
## non_tcds 0.9945 0.0431 0.0011 0.0221   0.0254 0.2386
# spanish
ggpairs(data = mlu_mtx_sp, 
        columns = 1:7, 
        switch = 'y', 
        lower = list(continuous = my_custom_smooth),
        upper = list(continuous = wrap("cor", size = 11)),
        title = "Spanish - MLUw") + 
  theme_classic() + 
  theme(text= element_text(size = 26),
        strip.placement = "outside",
        strip.text.y = element_text(face = "bold", size = 20), 
        strip.text.x = element_text(face = "bold", size = 20)) 

# correlation matrices
rcorr(as.matrix(mlu_mtx_sp), type = c("pearson"))
##          books play  food routines conv   ac non_tcds
## books     1.00 0.66 -0.23     0.42 0.54 0.37     0.48
## play      0.66 1.00  0.29     0.25 0.54 0.41     0.47
## food     -0.23 0.29  1.00     0.25 0.14 0.36     0.25
## routines  0.42 0.25  0.25     1.00 0.49 0.70     0.47
## conv      0.54 0.54  0.14     0.49 1.00 0.52     0.32
## ac        0.37 0.41  0.36     0.70 0.52 1.00     0.45
## non_tcds  0.48 0.47  0.25     0.47 0.32 0.45     1.00
## 
## n
##          books play food routines conv ac non_tcds
## books       20   15   11       15   20 20       20
## play        15   37   26       29   36 37       37
## food        11   26   31       24   29 31       31
## routines    15   29   24       35   35 35       35
## conv        20   36   29       35   43 43       43
## ac          20   37   31       35   43 45       45
## non_tcds    20   37   31       35   43 45       45
## 
## P
##          books  play   food   routines conv   ac     non_tcds
## books           0.0079 0.4894 0.1228   0.0139 0.1105 0.0333  
## play     0.0079        0.1490 0.1852   0.0006 0.0108 0.0031  
## food     0.4894 0.1490        0.2335   0.4586 0.0460 0.1735  
## routines 0.1228 0.1852 0.2335          0.0031 0.0000 0.0047  
## conv     0.0139 0.0006 0.4586 0.0031          0.0003 0.0355  
## ac       0.1105 0.0108 0.0460 0.0000   0.0003        0.0017  
## non_tcds 0.0333 0.0031 0.1735 0.0047   0.0355 0.0017

PROP RESPONSES

# english
ggpairs(data = propresp_mtx_en, 
        columns = 1:6, 
        switch = 'y', 
        lower = list(continuous = my_custom_smooth),
        upper = list(continuous = wrap("cor", size = 7)),
        title = "English - Prop Resp") + 
  theme_classic() + 
  theme(text= element_text(size = 18),
        strip.placement = "outside",
        strip.text.y = element_text(face = "bold", size = 15), 
        strip.text.x = element_text(face = "bold", size = 15)) 

# correlation matrices
rcorr(as.matrix(propresp_mtx_en), type = c("pearson"))
##          books  play  food routines  conv    ac
## books     1.00  0.50  0.36     0.03  0.34  0.29
## play      0.50  1.00  0.23     0.28  0.17 -0.14
## food      0.36  0.23  1.00    -0.23  0.04  0.05
## routines  0.03  0.28 -0.23     1.00  0.55 -0.05
## conv      0.34  0.17  0.04     0.55  1.00 -0.05
## ac        0.29 -0.14  0.05    -0.05 -0.05  1.00
## 
## n
##          books play food routines conv ac
## books       22   19   15       12   21 20
## play        19   39   26       26   36 38
## food        15   26   30       20   28 28
## routines    12   26   20       29   27 27
## conv        21   36   28       27   41 39
## ac          20   38   28       27   39 43
## 
## P
##          books  play   food   routines conv   ac    
## books           0.0292 0.1871 0.9285   0.1260 0.2094
## play     0.0292        0.2540 0.1705   0.3234 0.3899
## food     0.1871 0.2540        0.3265   0.8262 0.7937
## routines 0.9285 0.1705 0.3265          0.0033 0.8048
## conv     0.1260 0.3234 0.8262 0.0033          0.7845
## ac       0.2094 0.3899 0.7937 0.8048   0.7845
# spanish
ggpairs(data = propresp_mtx_sp, 
        columns = 1:6, 
        switch = 'y', 
        lower = list(continuous = my_custom_smooth),
        upper = list(continuous = wrap("cor", size = 7)),
        title = "Spanish - Prop Resp") + 
  theme_classic() + 
  theme(text= element_text(size = 18),
        strip.placement = "outside",
        strip.text.y = element_text(face = "bold", size = 15), 
        strip.text.x = element_text(face = "bold", size = 15)) 

# correlation matrices
rcorr(as.matrix(propresp_mtx_sp), type = c("pearson"))
##          books  play  food routines conv   ac
## books     1.00  0.21 -0.31     0.42 0.41 0.03
## play      0.21  1.00  0.40    -0.04 0.23 0.37
## food     -0.31  0.40  1.00     0.62 0.35 0.43
## routines  0.42 -0.04  0.62     1.00 0.25 0.14
## conv      0.41  0.23  0.35     0.25 1.00 0.23
## ac        0.03  0.37  0.43     0.14 0.23 1.00
## 
## n
##          books play food routines conv ac
## books       20   15   10       15   19 19
## play        15   36   24       28   33 35
## food        10   24   30       23   27 29
## routines    15   28   23       35   35 34
## conv        19   33   27       35   41 40
## ac          19   35   29       34   40 44
## 
## P
##          books  play   food   routines conv   ac    
## books           0.4630 0.3912 0.1211   0.0851 0.9106
## play     0.4630        0.0523 0.8211   0.1893 0.0269
## food     0.3912 0.0523        0.0015   0.0747 0.0193
## routines 0.1211 0.8211 0.0015          0.1418 0.4344
## conv     0.0851 0.1893 0.0747 0.1418          0.1602
## ac       0.9106 0.0269 0.0193 0.4344   0.1602

PROP IMIT and EXP

# english
ggpairs(data = propimitexp_mtx_en, 
        columns = 1:6, 
        switch = 'y', 
        lower = list(continuous = my_custom_smooth),
        upper = list(continuous = wrap("cor", size = 7)),
        title = "English - Prop Imit/Exp") + 
  theme_classic() + 
  theme(text= element_text(size = 18),
        strip.placement = "outside",
        strip.text.y = element_text(face = "bold", size = 15), 
        strip.text.x = element_text(face = "bold", size = 15)) 

# correlation matrices
rcorr(as.matrix(propimitexp_mtx_en), type = c("pearson"))
##          books play  food routines  conv    ac
## books     1.00 0.44  0.02    -0.05  0.40  0.09
## play      0.44 1.00  0.03     0.34  0.39  0.06
## food      0.02 0.03  1.00    -0.23 -0.06 -0.01
## routines -0.05 0.34 -0.23     1.00  0.47  0.16
## conv      0.40 0.39 -0.06     0.47  1.00  0.03
## ac        0.09 0.06 -0.01     0.16  0.03  1.00
## 
## n
##          books play food routines conv ac
## books       22   19   15       12   21 20
## play        19   39   26       26   36 38
## food        15   26   30       20   28 28
## routines    12   26   20       29   27 27
## conv        21   36   28       27   41 39
## ac          20   38   28       27   39 43
## 
## P
##          books  play   food   routines conv   ac    
## books           0.0606 0.9369 0.8760   0.0738 0.7026
## play     0.0606        0.8972 0.0849   0.0196 0.7260
## food     0.9369 0.8972        0.3339   0.7604 0.9604
## routines 0.8760 0.0849 0.3339          0.0126 0.4247
## conv     0.0738 0.0196 0.7604 0.0126          0.8374
## ac       0.7026 0.7260 0.9604 0.4247   0.8374
# spanish
ggpairs(data = propimitexp_mtx_en, 
        columns = 1:6, 
        switch = 'y', 
        lower = list(continuous = my_custom_smooth),
        upper = list(continuous = wrap("cor", size = 7)),
        title = "Spanish - Prop Imit/Exp") + 
  theme_classic() + 
  theme(text= element_text(size = 18),
        strip.placement = "outside",
        strip.text.y = element_text(face = "bold", size = 15), 
        strip.text.x = element_text(face = "bold", size = 15)) 

# correlation matrices
rcorr(as.matrix(propimitexp_mtx_sp), type = c("pearson"))
##          books play  food routines conv    ac
## books     1.00 0.32 -0.48     0.48 0.09 -0.09
## play      0.32 1.00  0.39     0.09 0.43  0.32
## food     -0.48 0.39  1.00     0.11 0.38  0.40
## routines  0.48 0.09  0.11     1.00 0.24  0.20
## conv      0.09 0.43  0.38     0.24 1.00  0.26
## ac       -0.09 0.32  0.40     0.20 0.26  1.00
## 
## n
##          books play food routines conv ac
## books       20   15   10       15   19 19
## play        15   36   24       28   33 35
## food        10   24   30       23   27 29
## routines    15   28   23       35   35 34
## conv        19   33   27       35   41 40
## ac          19   35   29       34   40 44
## 
## P
##          books  play   food   routines conv   ac    
## books           0.2419 0.1630 0.0681   0.7109 0.7044
## play     0.2419        0.0579 0.6506   0.0136 0.0640
## food     0.1630 0.0579        0.6231   0.0518 0.0303
## routines 0.0681 0.6506 0.6231          0.1705 0.2450
## conv     0.7109 0.0136 0.0518 0.1705          0.0985
## ac       0.7044 0.0640 0.0303 0.2450   0.0985